define functions

# MLM results table function
table_model = function(model_data, eff_size = TRUE, word_count = TRUE) {
  
  results = model_data %>%
    broom.mixed::tidy(conf.int = TRUE) %>%
    filter(effect == "fixed") %>%
    rename("SE" = std.error,
           "t" = statistic,
           "p" = p.value) %>%
    select(-group, -effect) %>%
    mutate_at(vars(-contains("term"), -contains("p")), round, 2) %>%
    mutate(term = gsub("article_cond", "", term),
           term = gsub("\\(Intercept\\)", "control", term),
           term = gsub("sharing_type", "sharing type", term),
           term = gsub("msg_rel_self_between", "self-relevance", term),
           term = gsub("msg_rel_social_between", "social relevance", term),
           term = gsub("grouptimed", "group (timed)", term),
           term = gsub("groupuntimed", "group (untimed)", term),
           term = gsub("contentclimate", "content (climate)", term),
           term = gsub("siteUSA", "sample (USA)", term),
           term = gsub("n_c", "word count", term),
           term = gsub(":", " x ", term),
           p = ifelse(p < .001, "< .001",
                      ifelse(p == 1, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
           `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high)) 
  
  if (word_count == TRUE) {
    results = results %>%
      mutate(term = gsub("control", "intercept", term))
  }
  
  if (eff_size == TRUE) {
    eff_size = lme.dscore(model_data, data = data, type = "lme4") %>%
      rownames_to_column(var = "term") %>%
      mutate(term = gsub("article_cond", "", term),
             term = gsub("article_cond", "", term),
             term = gsub("\\(Intercept\\)", "control", term),
             term = gsub("sharing_type", "sharing type", term),
             term = gsub("msg_rel_self_between", "self-relevance", term),
             term = gsub("msg_rel_social_between", "social relevance", term),
             term = gsub("contentclimate", "content (climate)", term),
             term = gsub(":", " x ", term),
             d = sprintf("%.2f", d)) %>%
      select(term, d)
    
    results %>%
      left_join(., eff_size) %>%
      mutate(d = ifelse(is.na(d), "--", d)) %>%
      select(term, `b [95% CI]`, d, df, t, p) %>%
      kable() %>%
      kableExtra::kable_styling()
    
  } else {
    results %>%
      select(term, `b [95% CI]`, df, t, p) %>%
      kable() %>%
      kableExtra::kable_styling()
  }
}

# simple effects function
simple_effects = function(model, sharing = FALSE) {
  if(sharing == FALSE) {
    results = emmeans::contrast(emmeans::emmeans(model, ~ article_cond | group),
                            "revpairwise", by = "group", adjust = "none") %>%
      data.frame() %>%
      filter(grepl("control", contrast)) %>%
      select(contrast, group, estimate, p.value)
  } else {
    results = emmeans::contrast(emmeans::emmeans(model, ~ article_cond | group + sharing_type),
                            "revpairwise", by = "group", adjust = "none") %>%
      data.frame() %>%
      filter(grepl("- control", contrast)) %>%
      filter(!grepl("^control", contrast)) %>%
      extract(contrast, c("exp_sharing", "control_sharing"), ".* (0|1) - control (0|1)", remove = FALSE) %>%
      filter(exp_sharing == control_sharing) %>%
      mutate(sharing_type = ifelse(exp_sharing == 0, "broadcast", "narrowcast"),
             contrast = gsub("0|1", "", contrast)) %>%
      select(contrast, sharing_type, group, estimate, p.value)
  }
  
  results %>%
    mutate(p.value = ifelse(p.value < .001, "< .001",
                      ifelse(p.value == 1, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p.value))))) %>%
    kable(digits = 2) %>%
    kableExtra::kable_styling()
}

prep data

First, we load the relevant packages and data, and define the plotting aesthetics.

load packages

if(!require('pacman')) {
    install.packages('pacman')
}

pacman::p_load(tidyverse, knitr, kableExtra, lmerTest, boot, report, brms, tidybayes, ggpubr, EMAtools, broom.mixed)

define aesthetics

palette_condition = c("#ee9b00", "#bb3e03", "#005f73")
palette_dv = c("#ee9b00", "#005f73", "#56282D")
palette_sharing = c("#0a9396", "#ee9b00")

plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank())

load data

data = read.csv("../data/study2.csv", stringsAsFactors = FALSE) %>%
  mutate(article_cond = ifelse(article_cond == "social", "other", article_cond))

n_words = read.csv("../data/study2_n_words.csv", stringsAsFactors = FALSE) %>%
  mutate(article_cond = ifelse(article_cond == "social", "other", article_cond))

group ns

Not sure whey we ended up with fewer people in the comment condition. Let’s look into that.

data %>%
  select(group, SID) %>%
  unique() %>%
  group_by(group) %>%
  summarize(n = n()) %>%
  kable() %>%
  kable_styling()
group n
comment 131
timed 159
untimed 169

manipulation checks

self-relevance

Test whether messages in the self condition will be rated as more self-relevant than messages in the control condition as a function of group.

Results

✅ We replicate our previous work in the comment group: the self condition increases self-relevance compared to the control

✅ This effect is smaller for both the timed and untimed groups

mod_h1 = lmer(msg_rel_self ~ 1 + article_cond * group + (1 + article_cond | SID),
              data = filter(data, sharing_type == 1),
              control = lmerControl(optimizer = "bobyqa"))

model summary table

table_model(mod_h1, eff_size = FALSE)
term b [95% CI] df t p
intercept 49.62 [45.38, 53.86] 451.69 23.01 < .001
other 4.05 [0.54, 7.57] 447.10 2.26 .024
self 14.61 [10.70, 18.53] 445.68 7.34 < .001
group (timed) -3.13 [-8.81, 2.54] 449.32 -1.09 .278
group (untimed) -4.53 [-10.13, 1.07] 450.27 -1.59 .113
other x group (timed) -5.71 [-10.41, -1.00] 445.43 -2.38 .018
self x group (timed) -9.94 [-15.16, -4.71] 445.32 -3.74 < .001
other x group (untimed) -3.33 [-7.98, 1.31] 445.67 -1.41 .159
self x group (untimed) -11.88 [-17.04, -6.72] 445.70 -4.52 < .001

simple effects by group

simple_effects(mod_h1)
contrast group estimate p.value
other - control comment 4.05 .024
self - control comment 14.61 < .001
other - control timed -1.65 .298
self - control timed 4.68 .008
other - control untimed 0.72 .642
self - control untimed 2.73 .111

social relevance

Test whether messages in the social condition will be rated as more socially relevant than messages in the control condition as a function of group.

Results

✅ We replicate our previous work in the comment group: the social condition increases social relevance compared to the control

✅ This effect is smaller for both the timed and untimed groups

mod_h2 = lmer(msg_rel_social ~ 1 + article_cond * group + (1 + article_cond | SID),
              data = filter(data, sharing_type == 1),
              control = lmerControl(optimizer = "bobyqa"))

model summary table

table_model(mod_h2, eff_size = FALSE)
term b [95% CI] df t p
intercept 53.33 [49.01, 57.64] 453.40 24.28 < .001
other 14.90 [11.41, 18.38] 447.67 8.40 < .001
self 10.49 [6.93, 14.04] 444.92 5.80 < .001
group (timed) -2.62 [-8.40, 3.16] 450.98 -0.89 .373
group (untimed) -4.06 [-9.76, 1.65] 452.03 -1.40 .163
other x group (timed) -14.43 [-19.09, -9.77] 445.92 -6.09 < .001
self x group (timed) -7.44 [-12.19, -2.70] 444.54 -3.08 .002
other x group (untimed) -10.72 [-15.32, -6.12] 446.17 -4.58 < .001
self x group (untimed) -8.05 [-12.75, -3.36] 444.82 -3.38 < .001

simple effects by group

simple_effects(mod_h2)
contrast group estimate p.value
other - control comment 14.90 < .001
self - control comment 10.49 < .001
other - control timed 0.47 .765
self - control timed 3.04 .057
other - control untimed 4.18 .006
self - control untimed 2.43 .118

plot predicted effects

# generate predicted values
predicted_h1 = ggeffects::ggpredict(mod_h1, c("article_cond", "group")) %>%
              data.frame() %>%
  mutate(model = "self-relevance")

predicted_h2 = ggeffects::ggpredict(mod_h2, c("article_cond", "group")) %>%
              data.frame() %>%
  mutate(model = "social relevance")

# manipulation check plot
bind_rows(predicted_h1, predicted_h2) %>%
  mutate(x = factor(x, levels = c("self", "control", "other"))) %>%
  ggplot(aes(x = group, y = predicted, color = x)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high), position = position_dodge(.5), size = 1) +
  facet_grid(~ model) +
  coord_flip() +
  scale_color_manual(name = "", values = palette_condition) +
  labs(x = "", y = "\nmean predicted relevance rating") +
  plot_aes +
  theme(legend.position = "top")

condition effects narrowcasting only

Test whether messages in the experimental conditions will evoke higher sharing intentions than messages in the control condition, and whether this is moderated by sharing type as a function group.

Results

✅ We replicate our previous work in the comment group: the self and social conditions increase sharing intentions compared to the control, and these effects are stronger for narrowcast compared to broadcasting sharing intentions

✅ These effects were smaller for both the timed and untimed groups

mod_h3 = lmer(msg_share ~ 1 + article_cond*group + (1 + article_cond | SID),
              data = filter(data, sharing_type == 1),
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

# generate predicted values
predicted_h3 = ggeffects::ggpredict(mod_h3, c("article_cond", "group")) %>%
              data.frame() %>%
  mutate(model = "sharing")

# causal analysis plot
predicted_h3 %>%
  mutate(x = factor(x, levels = c("self", "control", "other"))) %>%
  ggplot(aes(x = group, y = predicted, color = x)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high), position = position_dodge(.5), size = 1) +
  coord_flip() +
  scale_color_manual(name = "", values = palette_condition) +
  labs(x = "", y = "\nmean predicted sharing intention rating") +
  plot_aes +
  theme(legend.position = "top")

model summary table

table_model(mod_h3, eff_size = FALSE)
term b [95% CI] df t p
intercept 32.44 [27.76, 37.13] 453.83 13.62 < .001
other 14.74 [11.40, 18.07] 448.37 8.69 < .001
self 9.54 [6.31, 12.78] 445.40 5.80 < .001
group (timed) 1.10 [-5.18, 7.37] 451.38 0.34 .731
group (untimed) 0.01 [-6.18, 6.20] 452.53 0.00 .997
other x group (timed) -13.78 [-18.24, -9.32] 446.43 -6.07 < .001
self x group (timed) -8.49 [-12.81, -4.17] 445.02 -3.86 < .001
other x group (untimed) -13.46 [-17.87, -9.05] 446.74 -6.00 < .001
self x group (untimed) -9.37 [-13.64, -5.10] 445.28 -4.32 < .001

simple effects by group

simple_effects(mod_h3, sharing = FALSE)
contrast group estimate p.value
other - control comment 14.74 < .001
self - control comment 9.54 < .001
other - control timed 0.96 .524
self - control timed 1.05 .471
other - control untimed 1.28 .382
self - control untimed 0.17 .904

combined plot

bind_rows(predicted_h1, predicted_h2, predicted_h3) %>%
  mutate(model = factor(model, levels = c("self-relevance", "social relevance", "sharing")),
         x = factor(x, levels = c("self", "control", "other")),
         group = ifelse(group == "timed", "reflect:\ntimed",
                 ifelse(group == "untimed", "reflect:\nuntimed", "comment")),
         group = factor(group, levels = c("reflect:\ntimed", "reflect:\nuntimed", "comment"))) %>%
  ggplot(aes(x = group, y = predicted, color = x)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high), position = position_dodge(.5), size = 1.5) +
  facet_grid(~ model) +
  coord_flip() +
  scale_color_manual(name = "", values = palette_condition) +
  labs(x = "", y = "\npredicted rating") +
  plot_aes +
  theme(legend.position = "top")

condition effects by sharing type

Test whether messages in the experimental conditions will evoke higher sharing intentions than messages in the control condition, and whether this is moderated by sharing type as a function group.

Results

✅ We replicate our previous work in the comment group: the self and social conditions increase sharing intentions compared to the control, and these effects are stronger for narrowcast compared to broadcasting sharing intentions

✅ These effects were smaller for both the timed and untimed groups

mod_h3_h4 = lmer(msg_share ~ 1 + article_cond*sharing_type*group + (1 + sharing_type | SID),
              data = data,
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

# generate predicted values
predicted_h3_h4 = ggeffects::ggpredict(mod_h3_h4, c("article_cond", "sharing_type", "group")) %>%
              data.frame() %>%
  mutate(group = ifelse(group == "0", "broadcast sharing", "narrowcast sharing"),
         facet = ifelse(grepl("time", facet), sprintf("reflect:\n%s", facet), "comment"),
         facet = factor(facet, levels = c("reflect:\ntimed", "reflect:\nuntimed", "comment")))

# causal analysis plot
predicted_h3_h4 %>%
  mutate(group = gsub(" sharing", "", group)) %>%
  ggplot(aes(x = facet, y = predicted, color = x)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high), position = position_dodge(.5), size = 1) +
  facet_grid(~ group) +
  coord_flip() +
  scale_color_manual(name = "", values = palette_condition) +
  labs(x = "", y = "\nmean predicted sharing intention rating") +
  plot_aes +
  theme(legend.position = "top")

model summary table

table_model(mod_h3_h4, eff_size = FALSE)
term b [95% CI] df t p
intercept 24.65 [19.85, 29.45] 559.85 10.09 < .001
other 6.47 [3.87, 9.08] 9875.19 4.87 < .001
self 5.42 [2.81, 8.02] 9846.67 4.07 < .001
sharing type 7.78 [4.00, 11.57] 973.04 4.03 < .001
group (timed) 1.87 [-4.56, 8.30] 556.66 0.57 .568
group (untimed) 1.60 [-4.74, 7.95] 558.28 0.50 .620
other x sharing type 8.29 [4.61, 11.97] 9892.38 4.42 < .001
self x sharing type 4.14 [0.45, 7.82] 9851.42 2.20 .028
other x group (timed) -4.80 [-8.29, -1.32] 9860.03 -2.71 .007
self x group (timed) -5.25 [-8.73, -1.77] 9848.22 -2.95 .003
other x group (untimed) -5.33 [-8.77, -1.89] 9861.83 -3.04 .002
self x group (untimed) -4.29 [-7.73, -0.85] 9852.85 -2.45 .014
sharing type x group (timed) -0.75 [-5.81, 4.32] 963.48 -0.29 .773
sharing type x group (untimed) -1.63 [-6.64, 3.37] 967.36 -0.64 .521
other x sharing type x group (timed) -9.00 [-13.93, -4.08] 9869.82 -3.59 < .001
self x sharing type x group (timed) -3.26 [-8.19, 1.66] 9853.18 -1.30 .194
other x sharing type x group (untimed) -8.12 [-12.99, -3.26] 9871.90 -3.28 .001
self x sharing type x group (untimed) -5.07 [-9.93, -0.20] 9860.72 -2.04 .041

simple effects by group

simple_effects(mod_h3_h4, sharing = TRUE)
contrast sharing_type group estimate p.value
other - control broadcast comment 6.47 < .001
self - control broadcast comment 5.42 < .001
other - control narrowcast comment 14.77 < .001
self - control narrowcast comment 9.55 < .001
other - control broadcast timed 1.67 .156
self - control broadcast timed 0.17 .887
other - control narrowcast timed 0.96 .416
self - control narrowcast timed 1.04 .378
other - control broadcast untimed 1.14 .318
self - control broadcast untimed 1.12 .326
other - control narrowcast untimed 1.31 .252
self - control narrowcast untimed 0.19 .868

word count analyses

Test whether word count is higher in the experimental conditions, and whether it is positively associated with self and social relevance, and sharing intention ratings.

summarize

words_ratings = n_words %>%
  left_join(., data) %>%
  ungroup() %>%
  mutate(n_c = n - mean(n, na.rm = TRUE))

n_words %>%
  group_by(article_cond) %>%
  summarize(mean = mean(n, na.rm = TRUE),
            sd = sd(n, na.rm = TRUE),
            min = min(n, na.rm = TRUE),
            max = max(n, na.rm = TRUE)) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
article_cond mean sd min max
control 13.77 7.33 3 72
other 17.17 9.32 3 69
self 18.14 10.43 3 72

condition model

Is word count higher in the experimental conditions compared to the control condition?

Results

✅ The word count is higher in the experimental conditions compared to the control condition

mod_words = lmer(n ~ 1 + article_cond + (1 + article_cond | SID),
              data = n_words,
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

predicted = ggeffects::ggpredict(mod_words, c("article_cond")) %>%
              data.frame()

predicted %>%
  ggplot(aes(x = "", y = predicted, color = x)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high), position = position_dodge(.5), size = 1) +
  coord_flip() +
  scale_color_manual(name = "", values = palette_condition) +
  labs(x = "", y = "\nmean predicted word count") +
  plot_aes +
  theme(legend.position = "top")

model summary

table_model(mod_words, eff_size = FALSE)
term b [95% CI] df t p
intercept 13.76 [12.74, 14.79] 126.25 26.56 < .001
other 3.38 [2.33, 4.43] 124.53 6.38 < .001
self 4.33 [3.15, 5.52] 124.73 7.23 < .001

relevance models

Is word count positively associated with self and social relevance ratings?

self-relevance

Results

✅ Word count is positively associated with self-relevance ratings

mod_words_h1 = lmer(msg_rel_self ~ 1 + n_c + (1 + n_c | SID),
              data = filter(words_ratings, sharing_type == 1),
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

values = seq(-15, 60, 10)
predicted_self = ggeffects::ggpredict(mod_words_h1, terms = "n_c [values]") %>%
  data.frame()

predicted_self %>%
  ggplot(aes(x, predicted)) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25) +
  geom_line(size = 1) +
  coord_cartesian(ylim = c(40, 110)) +
  labs(x = "\nword count (grand mean-centered)", y = "predicted self-relevance rating\n") +
  plot_aes

model summary

table_model(mod_words_h1, eff_size = FALSE, word_count = TRUE)
term b [95% CI] df t p
intercept 55.77 [51.77, 59.78] 126.45 27.55 < .001
word count 0.49 [0.24, 0.74] 56.60 3.97 < .001

social relevance

Results

✅ Word count is positively associated with social relevance ratings

mod_words_h2 = lmer(msg_rel_social ~ 1 + n_c + (1 + n_c | SID),
              data = filter(words_ratings, sharing_type == 1),
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

values = seq(-15, 60, 10)
predicted_social = ggeffects::ggpredict(mod_words_h2, terms = "n_c [values]") %>%
  data.frame()

predicted_social %>%
  ggplot(aes(x, predicted)) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25) +
  geom_line(size = 1) +
  coord_cartesian(ylim = c(40, 105)) +
  labs(x = "\nword count (grand mean-centered)", y = "predicted social relevance rating\n") +
  plot_aes

model summary

table_model(mod_words_h2, eff_size = FALSE, word_count = TRUE)
term b [95% CI] df t p
intercept 61.98 [57.89, 66.07] 126.76 30.01 < .001
word count 0.46 [0.23, 0.69] 77.46 4.04 < .001

combined plot

data_raw = words_ratings %>%
  filter(sharing_type == 1) %>%
  select(SID, n_c, msg_rel_self, msg_rel_social) %>%
  gather(group, predicted, contains("msg")) %>%
  rename("x" = n_c) %>%
  mutate(group = ifelse(group == "msg_rel_self", "self","social"),
         group = factor(group, levels = c("self", "social")))
  
predicted_self %>%
  mutate(group = "self") %>%
  bind_rows(., predicted_social %>%  mutate(group = "social")) %>%
  mutate(group = factor(group, levels = c("self", "social"))) %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  geom_point(data = data_raw, aes(x, predicted, color = group, fill = group), alpha = .25) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25, color = NA) +
  geom_line(size = 2) +
  scale_x_continuous(breaks = seq(-10, 60, 10)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  scale_color_manual(values = c(palette_condition[1], palette_condition[3]), name = "") + 
  scale_fill_manual(values = c(palette_condition[1], palette_condition[3]), name = "") + 
  labs(x = "\nword count (grand mean-centered)", y = "predicted relevance rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .21))

sharing models

Is word count positively associated with sharing intention ratings?

Results

✅ Word count is positively associated with sharing intentions (averaging across sharing types), but doesn’t differ by sharing type

mod_words_h3 = lmer(msg_share ~ 1 + n_c + (1 + n_c | SID),
              data = filter(words_ratings, sharing_type == 1),
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

values = seq(-15, 60, 10)
predicted_sharing = ggeffects::ggpredict(mod_words_h3, terms = c("n_c [values]")) %>%
  data.frame()

predicted_sharing %>%
  ggplot(aes(x, predicted)) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25, color = NA) +
  geom_line(size = 1) +
  scale_color_manual(values = palette_sharing, name = "") +
  scale_fill_manual(values = palette_sharing, name = "") +
  labs(x = "\nword count (grand mean-centered)", y = "predicted sharing intention rating\n") +
  plot_aes

model summary

table_model(mod_words_h3, eff_size = FALSE, word_count = TRUE)
term b [95% CI] df t p
intercept 40.89 [36.25, 45.53] 126.13 17.43 < .001
word count 0.33 [0.07, 0.59] 52.69 2.56 .013

combined plot

data_raw = words_ratings %>%
  filter(sharing_type == 1) %>%
  select(SID, n_c, msg_rel_self, msg_rel_social, msg_share) %>%
  gather(group, predicted, contains("msg")) %>%
  rename("x" = n_c) %>%
  mutate(group = ifelse(group == "msg_rel_self", "self-relevance",
                 ifelse(group == "msg_rel_social", "social relevance", "sharing")),
         group = factor(group, levels = c("self-relevance", "social relevance", "sharing")))
  
predicted_self %>%
  mutate(group = "self-relevance") %>%
  bind_rows(., predicted_social %>%  mutate(group = "social relevance")) %>%
  bind_rows(., predicted_sharing %>%  mutate(group = "sharing")) %>%
  mutate(group = factor(group, levels = c("self-relevance", "social relevance", "sharing"))) %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  geom_point(data = data_raw, aes(x, predicted, color = group, fill = group), alpha = .25) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25, color = NA) +
  geom_line(size = 2) +
  scale_x_continuous(breaks = seq(-10, 60, 10)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  scale_color_manual(values = palette_dv, name = "") + 
  scale_fill_manual(values = palette_dv, name = "") + 
  labs(x = "\nword count (grand mean-centered)", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .21))

sharing models by sharing type

Is word count positively associated with sharing intention ratings?

Results

✅ Word count is positively associated with sharing intentions (averaging across sharing types), but doesn’t differ by sharing type

mod_words_h3 = lmer(msg_share ~ 1 + n_c * sharing_type + (1 + n_c | SID),
              data = words_ratings,
              control = lmerControl(optimizer = "bobyqa"))

plot predicted effects

values = seq(-20, 60, 10)
predicted = ggeffects::ggpredict(mod_words_h3, terms = c("n_c [values]", "sharing_type")) %>%
  data.frame() %>%
  mutate(group = ifelse(group == "0", "broadcast sharing", "narrowcast sharing"))

predicted %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25, color = NA) +
  geom_line(size = 1) +
  scale_color_manual(values = palette_sharing, name = "") +
  scale_fill_manual(values = palette_sharing, name = "") +
  labs(x = "\nword count (grand mean-centered)", y = "predicted sharing intention rating\n") +
  plot_aes

model summary

table_model(mod_words_h3, eff_size = FALSE, word_count = TRUE)
term b [95% CI] df t p
intercept 28.77 [24.23, 33.32] 135.12 12.53 < .001
word count 0.21 [0.01, 0.41] 86.66 2.05 .043
sharing type 12.09 [10.47, 13.70] 2719.42 14.68 < .001
word count x sharing type 0.07 [-0.10, 0.25] 2719.42 0.83 .408

moderation by article topic comment group only

These analyses explore whether the analyses reported in study 2 of the main manuscript are moderated by article topic (health or climate).

data_comment = data %>%
  filter(group == "comment") %>%
  select(-group)

H2

Are the effects of the experimental manipulations on relevance moderated by article topic?

self-relevance

There is a main effect of topic such that health articles are rated as more self-relevant than climate articles.

The was also an interaction such that the effect of the self-focused condition on self-relevance was weaker for health articles.

mod_h2a = lmer(msg_rel_self ~ article_cond * topic + (1 | SID),
               data = filter(data_comment, sharing_type == 0), 
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2a = table_model(mod_h2a)

table_h2a
term b [95% CI] d df t p
intercept 50.28 [45.29, 55.27] 303.98 19.83 < .001
other 7.15 [2.37, 11.92] 0.04 1383.38 2.93 .003
self 13.65 [8.93, 18.37] 0.14 1375.83 5.67 < .001
topichealth -1.50 [-6.32, 3.33] 0.06 1381.50 -0.61 .543
other x topichealth -5.77 [-12.66, 1.13] -0.03 1393.15 -1.64 .101
self x topichealth 2.00 [-4.90, 8.89] 0.00 1390.12 0.57 .570

summary

summary(mod_h2a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_rel_self ~ article_cond * topic + (1 | SID)
##    Data: filter(data_comment, sharing_type == 0)
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14244.9
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.11437 -0.62440  0.09301  0.68148  2.67978 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 453.9    21.30   
##  Residual             699.9    26.46   
## Number of obs: 1491, groups:  SID, 127
## 
## Fixed effects:
##                               Estimate Std. Error       df t value
## (Intercept)                     50.283      2.535  303.976  19.833
## article_condother                7.146      2.435 1383.378   2.935
## article_condself                13.650      2.406 1375.831   5.674
## topichealth                     -1.495      2.459 1381.502  -0.608
## article_condother:topichealth   -5.766      3.513 1393.147  -1.641
## article_condself:topichealth     1.998      3.516 1390.121   0.568
##                                           Pr(>|t|)    
## (Intercept)                   < 0.0000000000000002 ***
## article_condother                          0.00339 ** 
## article_condself                       0.000000017 ***
## topichealth                                0.54328    
## article_condother:topichealth              0.10096    
## article_condself:topichealth               0.57002    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) artcl_cndt artcl_cnds tpchlt artcl_cndt:
## artcl_cndth -0.469                                         
## artcl_cndsl -0.468  0.488                                  
## topichealth -0.465  0.500      0.504                       
## artcl_cndt:  0.338 -0.722     -0.354     -0.721            
## artcl_cnds:  0.334 -0.348     -0.714     -0.719  0.504

social relevance

There is a main effect of topic such that health articles are rated as more socially relevant than climate articles.

These data are not consistent with moderation by topic.

mod_h2b = lmer(msg_rel_social ~ article_cond * topic + (1 | SID),
               data = filter(data_comment, sharing_type == 0), 
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2b = table_model(mod_h2b)

table_h2b
term b [95% CI] d df t p
intercept 49.99 [45.14, 54.84] 254.66 20.30 < .001
other 16.67 [12.45, 20.88] 0.16 1378.20 7.76 < .001
self 13.37 [9.20, 17.53] 0.14 1371.74 6.30 < .001
topichealth 6.93 [2.67, 11.19] 0.21 1376.05 3.19 .001
other x topichealth -3.93 [-10.01, 2.16] -0.03 1385.24 -1.27 .206
self x topichealth -6.04 [-12.13, 0.05] -0.04 1382.65 -1.95 .052

summary

summary(mod_h2b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_rel_social ~ article_cond * topic + (1 | SID)
##    Data: filter(data_comment, sharing_type == 0)
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 13906.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3910 -0.4875  0.1093  0.6262  2.8529 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 487.8    22.09   
##  Residual             543.7    23.32   
## Number of obs: 1491, groups:  SID, 127
## 
## Fixed effects:
##                               Estimate Std. Error       df t value
## (Intercept)                     49.994      2.463  254.657  20.302
## article_condother               16.669      2.149 1378.199   7.758
## article_condself                13.367      2.122 1371.738   6.300
## topichealth                      6.931      2.170 1376.053   3.195
## article_condother:topichealth   -3.926      3.101 1385.236  -1.266
## article_condself:topichealth    -6.040      3.103 1382.651  -1.947
##                                           Pr(>|t|)    
## (Intercept)                   < 0.0000000000000002 ***
## article_condother               0.0000000000000167 ***
## article_condself                0.0000000004004002 ***
## topichealth                                0.00143 ** 
## article_condother:topichealth              0.20576    
## article_condself:topichealth               0.05179 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) artcl_cndt artcl_cnds tpchlt artcl_cndt:
## artcl_cndth -0.426                                         
## artcl_cndsl -0.425  0.488                                  
## topichealth -0.423  0.500      0.505                       
## artcl_cndt:  0.307 -0.723     -0.354     -0.721            
## artcl_cnds:  0.303 -0.348     -0.714     -0.719  0.504

combined plot

predicted_h2 = ggeffects::ggpredict(mod_h2a, c("article_cond", "topic")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("article_cond", "topic")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h2 = data_comment %>%
  rename("x" = article_cond,
         "group" = topic) %>%
  gather(model, predicted, msg_rel_self, msg_rel_social) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         model = gsub("msg_rel_self", "self-relevance", model),
         model = gsub("msg_rel_social", "social relevance", model))
  
(plot_h2 = predicted_h2 %>%
  ggplot(aes(x = x, y = predicted)) +
  # stat_summary(data = ind_data_h2, aes(group = SID, linetype = group), fun = "mean", geom = "line",
  #              size = .1, color = "grey50") +
  stat_summary(aes(group = group, linetype = group),
               fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_linetype_manual(name = "", values = c("solid", "dotted")) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15)))

H3

Are the relationships between self and social relevance and sharing intentions moderated by article topic?

The relationship between self-relevance and sharing intentions was not moderated by topic.

However, the relationship between social relevance and sharing intentions was slightly stronger for health articles compared to climate articles.

mod_h3 = lmer(msg_share ~ msg_rel_self * topic + msg_rel_social * topic + (1 + msg_rel_self | SID),
              data = data_comment,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted = ggeffects::ggpredict(mod_h3, c("msg_rel_self", "topic")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("msg_rel_social", "topic")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

points = data_comment %>%
  rename("predicted" = msg_share,
         "group" = topic) %>%
  gather(variable, x, msg_rel_self, msg_rel_social) %>%
  mutate(variable = gsub("msg_rel_self", "self-relevance", variable),
         variable = gsub("msg_rel_social", "social relevance", variable))

(plot_rel_sharing = predicted %>%
  ggplot(aes(x, predicted)) +
  geom_point(data = points, aes(x, predicted, color = group),
             alpha = .5, size = .25) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = group), alpha = .2, color = NA) +
  geom_line(aes(color = group), size = 1) +
  facet_grid(~variable) +
  # scale_color_manual(name = "", values = palette_topic) +
  # scale_fill_manual(name = "", values = palette_topic) +
  labs(x = "\nrating", y = "predicted sharing intention\n") +
  plot_aes)

model table

table_h3 = table_model(mod_h3)

table_h3
term b [95% CI] d df t p
intercept 4.30 [0.95, 7.64] 172.83 2.54 .012
msg_rel_self 0.12 [0.05, 0.20] 0.35 533.58 3.23 .001
topichealth -1.02 [-4.39, 2.34] -0.02 2623.23 -0.60 .551
msg_rel_social 0.35 [0.29, 0.42] 0.42 2160.26 10.40 < .001
msg_rel_self x topichealth -0.01 [-0.09, 0.06] -0.01 2551.45 -0.37 .714
topichealth x msg_rel_social 0.04 [-0.04, 0.12] 0.02 2406.08 0.93 .350

summary

summary(mod_h3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_share ~ msg_rel_self * topic + msg_rel_social * topic + (1 +  
##     msg_rel_self | SID)
##    Data: data_comment
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 26935.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5637 -0.5159 -0.0467  0.3751  3.7795 
## 
## Random effects:
##  Groups   Name         Variance  Std.Dev. Corr
##  SID      (Intercept)  131.15619 11.45        
##           msg_rel_self   0.04002  0.20    0.27
##  Residual              422.70870 20.56        
## Number of obs: 2982, groups:  SID, 127
## 
## Fixed effects:
##                              Estimate Std. Error         df t value
## (Intercept)                   4.29877    1.69468  172.82574   2.537
## msg_rel_self                  0.12174    0.03765  533.57860   3.234
## topichealth                  -1.02410    1.71627 2623.23157  -0.597
## msg_rel_social                0.35446    0.03410 2160.26304  10.395
## msg_rel_self:topichealth     -0.01438    0.03917 2551.44626  -0.367
## topichealth:msg_rel_social    0.03828    0.04094 2406.07685   0.935
##                                       Pr(>|t|)    
## (Intercept)                             0.0121 *  
## msg_rel_self                            0.0013 ** 
## topichealth                             0.5508    
## msg_rel_social             <0.0000000000000002 ***
## msg_rel_self:topichealth                0.7136    
## topichealth:msg_rel_social              0.3499    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) msg_rl_sl tpchlt msg_rl_sc msg__:
## msg_rel_slf -0.053                                  
## topichealth -0.471  0.093                           
## msg_rel_scl -0.308 -0.701     0.205                 
## msg_rl_slf:  0.079 -0.700    -0.116  0.672          
## tpchlth:m__  0.187  0.577    -0.434 -0.763    -0.802
## optimizer (bobyqa) convergence code: 0 (OK)
## Model is nearly unidentifiable: very large eigenvalue
##  - Rescale variables?

H5

Are the effect of the experimental manipulations on sharing intentions moderated by article topic?

There is a main effect of topic, such that health articles have higher sharing intentions than climate articles.

These data are not consistent with moderation by topic.

mod_h5 = lmer(msg_share ~ article_cond * topic + (1 | SID),
              data = data_comment,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5 = ggeffects::ggpredict(mod_h5, c("article_cond", "topic")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

ind_data_h5 = data_comment %>%
  rename("x" = article_cond,
         "predicted" = msg_share,
         "group" = topic) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))
  
predicted_h5 %>%
  ggplot(aes(x = x, y = predicted)) +
  # stat_summary(data = ind_data_h5, aes(group = SID, linetype = group),
  #              fun = "mean", geom = "line", size = .25, color = "grey50") +
  stat_summary(aes(group = group, linetype = group),
               fun = "mean", geom = "line", size = 1.5) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high, group = group),
                  size = 1.5) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_linetype_manual(name = "", values = c("solid", "dotted")) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_h5 = table_model(mod_h5)

table_h5
term b [95% CI] d df t p
intercept 28.06 [23.20, 32.92] 174.34 11.40 < .001
other 9.60 [6.64, 12.56] 0.10 2867.34 6.35 < .001
self 8.08 [5.15, 11.00] 0.08 2860.36 5.42 < .001
topichealth 1.04 [-1.95, 4.03] 0.09 2864.03 0.68 .497
other x topichealth 1.86 [-2.42, 6.14] -0.01 2872.33 0.85 .394
self x topichealth -1.24 [-5.52, 3.05] -0.03 2869.63 -0.57 .572

summary

summary(mod_h5)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_share ~ article_cond * topic + (1 | SID)
##    Data: data_comment
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 27601.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3081 -0.5742 -0.1144  0.4179  3.3886 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  SID      (Intercept) 629.3    25.08   
##  Residual             535.6    23.14   
## Number of obs: 2982, groups:  SID, 127
## 
## Fixed effects:
##                               Estimate Std. Error       df t value
## (Intercept)                     28.059      2.461  174.342  11.402
## article_condother                9.601      1.511 2867.340   6.354
## article_condself                 8.078      1.491 2860.361   5.418
## topichealth                      1.037      1.526 2864.032   0.680
## article_condother:topichealth    1.863      2.183 2872.332   0.853
## article_condself:topichealth    -1.236      2.184 2869.633  -0.566
##                                           Pr(>|t|)    
## (Intercept)                   < 0.0000000000000002 ***
## article_condother                   0.000000000244 ***
## article_condself                    0.000000065361 ***
## topichealth                                  0.497    
## article_condother:topichealth                0.394    
## article_condself:topichealth                 0.572    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) artcl_cndt artcl_cnds tpchlt artcl_cndt:
## artcl_cndth -0.300                                         
## artcl_cndsl -0.299  0.488                                  
## topichealth -0.297  0.501      0.506                       
## artcl_cndt:  0.217 -0.724     -0.355     -0.721            
## artcl_cnds:  0.214 -0.348     -0.715     -0.720  0.504

combined table

# table_h1a %>% mutate(DV = "H1a: Self-relevance") %>%
#   bind_rows(table_h1b %>% mutate(DV = "H1b: Social relevance")) %>%
#   bind_rows(table_h2a %>% mutate(DV = "H2a: Self-relevance")) %>%
#   bind_rows(table_h2b %>% mutate(DV = "H2b: Social relevance")) %>%
#   bind_rows(table_h3 %>% mutate(DV = "H3a-b: Sharing intention")) %>%
#   bind_rows(table_h4a %>% mutate(DV = "H4a: Self-referential ROI")) %>%
#   bind_rows(table_h4b %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
#   bind_rows(table_h5 %>% mutate(DV = "H5: Sharing intention")) %>%
#   bind_rows(table_h6a %>% mutate(DV = "H6a: Sharing intention")) %>%
#   bind_rows(table_h6b %>% mutate(DV = "H6b: Sharing intention")) %>%
#   select(DV, everything()) %>%
#   kable() %>%
#   kable_styling()